Add one more parameter to __vmread_vcpu and clean some code.
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Sat, 8 Oct 2005 08:54:06 +0000 (09:54 +0100)
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Sat, 8 Oct 2005 08:54:06 +0000 (09:54 +0100)
Signed-off-by: Xin Xiaohui <Xiaohui.xin@intel.com>
Signed-off-by: Li Chengyuan <Chengyuan.li@intel.com>
Signed-off-by: Nakajima Jun <jun.nakajima@intel.com>
xen/arch/x86/vmx.c
xen/arch/x86/vmx_io.c
xen/include/asm-x86/vmx.h

index 0b81d09f3c477edd788789ab9006659b3055cfeb..28d996e4a5a98e21bf767e2f9c2de4fa8dbcfbbf 100644 (file)
@@ -425,12 +425,13 @@ static int vmx_do_page_fault(unsigned long va, struct cpu_user_regs *regs)
 static void vmx_do_no_device_fault(void)
 {
     unsigned long cr0;
+    struct vcpu *v = current;
 
     clts();
     setup_fpu(current);
-    __vmread_vcpu(CR0_READ_SHADOW, &cr0);
+    __vmread_vcpu(v, CR0_READ_SHADOW, &cr0);
     if (!(cr0 & X86_CR0_TS)) {
-        __vmread_vcpu(GUEST_CR0, &cr0);
+        __vmread_vcpu(v, GUEST_CR0, &cr0);
         cr0 &= ~X86_CR0_TS;
         __vmwrite(GUEST_CR0, cr0);
     }
@@ -1347,6 +1348,7 @@ static int vmx_cr_access(unsigned long exit_qualification, struct cpu_user_regs
 {
     unsigned int gp, cr;
     unsigned long value;
+    struct vcpu *v = current;
 
     switch (exit_qualification & CONTROL_REG_ACCESS_TYPE) {
     case TYPE_MOV_TO_CR:
@@ -1369,17 +1371,17 @@ static int vmx_cr_access(unsigned long exit_qualification, struct cpu_user_regs
         clts();
         setup_fpu(current);
 
-        __vmread_vcpu(GUEST_CR0, &value);
+        __vmread_vcpu(v, GUEST_CR0, &value);
         value &= ~X86_CR0_TS; /* clear TS */
         __vmwrite(GUEST_CR0, value);
 
-        __vmread_vcpu(CR0_READ_SHADOW, &value);
+        __vmread_vcpu(v, CR0_READ_SHADOW, &value);
         value &= ~X86_CR0_TS; /* clear TS */
         __vmwrite(CR0_READ_SHADOW, value);
         break;
     case TYPE_LMSW:
         TRACE_VMEXIT(1,TYPE_LMSW);
-        __vmread_vcpu(CR0_READ_SHADOW, &value);
+        __vmread_vcpu(v, CR0_READ_SHADOW, &value);
         value = (value & ~0xF) |
             (((exit_qualification & LMSW_SOURCE_DATA) >> 16) & 0xF);
         return vmx_set_cr0(value);
index 67cb7739f8e034853abbc2ece63f48e81da3677e..8809d2170425f470743ed328ed07061c71e2fc14 100644 (file)
@@ -891,7 +891,7 @@ asmlinkage void vmx_intr_assist(void)
     struct vcpu *v = current;
 
     highest_vector = find_highest_pending_irq(v, &intr_type);
-    __vmread_vcpu(CPU_BASED_VM_EXEC_CONTROL, &cpu_exec_control);
+    __vmread_vcpu(v, CPU_BASED_VM_EXEC_CONTROL, &cpu_exec_control);
 
     if (highest_vector == -1) {
         disable_irq_window(cpu_exec_control);
index cdbb5d0b3f67088b7db10a6e00c4899ac80fae7d..c1ba8fd436a06d61f55dba0fa8f9bc4afe51f923 100644 (file)
@@ -314,10 +314,8 @@ static always_inline int ___vmread (const unsigned long field,  void *ptr, const
 }
 
 
-static always_inline void __vmwrite_vcpu(unsigned long field, unsigned long value)
+static always_inline void __vmwrite_vcpu(struct vcpu *v, unsigned long field, unsigned long value)
 {
-    struct vcpu *v = current;
-
     switch(field) {
     case CR0_READ_SHADOW:
        v->arch.arch_vmx.cpu_shadow_cr0 = value;
@@ -334,10 +332,8 @@ static always_inline void __vmwrite_vcpu(unsigned long field, unsigned long valu
     }
 }
 
-static always_inline void __vmread_vcpu(unsigned long field, unsigned long *value)
+static always_inline void __vmread_vcpu(struct vcpu *v, unsigned long field, unsigned long *value)
 {
-    struct vcpu *v = current;
-
     switch(field) {
     case CR0_READ_SHADOW:
        *value = v->arch.arch_vmx.cpu_shadow_cr0;
@@ -352,24 +348,15 @@ static always_inline void __vmread_vcpu(unsigned long field, unsigned long *valu
        printk("__vmread_cpu: invalid field %lx\n", field);
        break;
     }
-
-   /* 
-    * __vmwrite() can be used for non-current vcpu, and it's possible that
-    * the vcpu field is not initialized at that case.
-    * 
-    */
-    if (!*value) {
-       __vmread(field, value);
-       __vmwrite_vcpu(field, *value);
-    }
 }
 
 static inline int __vmwrite (unsigned long field, unsigned long value)
 {
     unsigned long eflags;
+    struct vcpu *v = current;
 
     __asm__ __volatile__ ( VMWRITE_OPCODE
-                           MODRM_EAX_ECX       
+                           MODRM_EAX_ECX
                            :
                            : "a" (field) , "c" (value)
                            : "memory");
@@ -381,7 +368,7 @@ static inline int __vmwrite (unsigned long field, unsigned long value)
     case CR0_READ_SHADOW:
     case GUEST_CR0:
     case CPU_BASED_VM_EXEC_CONTROL:
-       __vmwrite_vcpu(field, value);
+       __vmwrite_vcpu(v, field, value);
        break;
     }
 
@@ -437,13 +424,14 @@ static inline int __vmxon (u64 addr)
 static inline void vmx_stts(void)
 {
     unsigned long cr0;
+    struct vcpu *v = current;
 
-    __vmread_vcpu(GUEST_CR0, &cr0);
+    __vmread_vcpu(v, GUEST_CR0, &cr0);
     if (!(cr0 & X86_CR0_TS)) {
         __vmwrite(GUEST_CR0, cr0 | X86_CR0_TS);
     }
 
-    __vmread_vcpu(CR0_READ_SHADOW, &cr0);
+    __vmread_vcpu(v, CR0_READ_SHADOW, &cr0);
     if (!(cr0 & X86_CR0_TS))
        __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM);
 }
@@ -453,7 +441,7 @@ static inline int vmx_paging_enabled(struct vcpu *v)
 {
     unsigned long cr0;
 
-    __vmread_vcpu(CR0_READ_SHADOW, &cr0);
+    __vmread_vcpu(v, CR0_READ_SHADOW, &cr0);
     return (cr0 & X86_CR0_PE) && (cr0 & X86_CR0_PG);
 }